Add compile-time option to use writable pagetables.
help
Device-driver domain (physical device access)
+config XEN_WRITABLE_PAGETABLES
+ bool "Use writable pagetables"
+ default n
+ help
+ Use writable L1 pagetables
+
endmenu
# Xen's block device backend driver needs 2^12 pages
HYPERVISOR_vm_assist(VMASST_CMD_enable,
VMASST_TYPE_4gb_segments);
-#if 0
+#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
HYPERVISOR_vm_assist(VMASST_CMD_enable,
VMASST_TYPE_writeable_pagetables);
#endif
static inline unsigned long pmd_val(pmd_t x)
{
unsigned long ret = x.pmd;
- if ( (ret & 1) ) ret = machine_to_phys(ret);
+ if ( (ret) ) ret = machine_to_phys(ret);
return ret;
}
#define pgd_val(x) ({ BUG(); (unsigned long)0; })
#include <asm/fixmap.h>
#include <linux/threads.h>
#include <linux/mm.h> /* for struct page */
+#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
set_pmd(pmd, __pmd(_PAGE_TABLE +
((unsigned long long)page_to_pfn(pte) <<
(unsigned long long) PAGE_SHIFT)));
- flush_page_update_queue(); /* XXXcl flush */
+ flush_page_update_queue();
+ /* XXXcl queue */
}
/*
* Allocate and free page tables.
static inline void pte_free_kernel(pte_t *pte)
{
free_page((unsigned long)pte);
+ __make_page_writeable(pte);
}
static inline void pte_free(struct page *pte)
{
- __free_page(pte);
+#ifdef CONFIG_HIGHPTE
+ if (pte < highmem_start_page)
+#endif
+ {
+ __make_page_writeable(phys_to_virt(page_to_pseudophys(pte)));
+ __free_page(pte);
+ }
}
#define __pte_free_tlb(tlb,pte) do { \
tlb_remove_page((tlb),(pte)); \
- flush_page_update_queue(); /* XXXcl flush */ \
+ flush_page_update_queue(); \
+ /* XXXcl queue */ \
} while (0)
/*
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
+#ifdef CONFIG_XEN_WRITABLE_PAGETABLES
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#define set_pte_atomic(pteptr, pteval) (*(pteptr) = pteval)
+#else
#define set_pte(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
#define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
+#endif
/*
* (pmds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
{
pte_t pte = *xp;
if (pte.pte_low)
- queue_l1_entry_update(xp, 0);
+ set_pte(xp, __pte_ma(0));
return pte;
}
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
/* pmd_clear below */
-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
}
static inline void ptep_set_wrprotect(pte_t *ptep)
{
- unsigned long pteval = *(unsigned long *)ptep;
- if ((pteval & _PAGE_RW))
- queue_l1_entry_update(ptep, pteval & ~_PAGE_RW);
+ pte_t pte = *ptep;
+ if (pte_write(pte))
+ set_pte(ptep, pte_wrprotect(pte));
}
static inline void ptep_mkdirty(pte_t *ptep)
{
pmd_t p = *(xp); \
set_pmd(xp, __pmd(0)); \
__make_page_writeable((void *)pmd_page_kernel(p)); \
+ /* XXXcl queue */ \
} while (0)
#ifndef CONFIG_DISCONTIGMEM
if ( (unsigned long)va >= VMALLOC_START )
__make_page_readonly(machine_to_virt(
*(unsigned long *)pte&PAGE_MASK));
+ /* XXXcl queue */
}
static inline void make_page_writeable(void *va)
if ( (unsigned long)va >= VMALLOC_START )
__make_page_writeable(machine_to_virt(
*(unsigned long *)pte&PAGE_MASK));
+ /* XXXcl queue */
}
static inline void make_pages_readonly(void *va, unsigned int nr)
make_page_readonly(va);
va = (void *)((unsigned long)va + PAGE_SIZE);
}
+ /* XXXcl queue */
}
static inline void make_pages_writeable(void *va, unsigned int nr)
make_page_writeable(va);
va = (void *)((unsigned long)va + PAGE_SIZE);
}
+ /* XXXcl queue */
}
static inline unsigned long arbitrary_virt_to_phys(void *va)